Key Specifications
flow_from_directory, Dataset.from_generatorAdditional Specifications
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive
from sklearn.metrics import precision_score, recall_score, f1_score, accuracy_score
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing import image
from sklearn.metrics import confusion_matrix
from IPython.display import Image, display
from tensorflow.keras import regularizers
from collections import Counter
from tensorflow import keras
from IPython.utils import io
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import tensorflow as tf
import seaborn as sns
import numpy as np
import math
import cv2
import os
# Warnings off
import warnings
warnings.filterwarnings("ignore")
!unzip '/content/drive/MyDrive/Face Mask/dataset.zip'
!ls /content
dataset drive sample_data
cwd = os.getcwd()
dataset_dir = os.path.join(cwd, 'dataset')
img_h = 256
img_w = 256
batch_size = 32
train = ImageDataGenerator(
rescale = 1./255,
validation_split = 0.3)
# In case we wanted to perform augmentation
train_aug = ImageDataGenerator(
width_shift_range = 10,
height_shift_range = 10,
zoom_range = 0.3,
horizontal_flip = True,
vertical_flip = True,
fill_mode = 'constant',
rescale = 1./255,
validation_split = 0.3)
train_gen = train.flow_from_directory(
dataset_dir,
target_size = (img_h, img_w),
batch_size = batch_size,
class_mode = 'categorical',
subset = 'training',
shuffle = True,
seed = 42)
validation_gen = train.flow_from_directory(
dataset_dir,
target_size = (img_h, img_w),
batch_size = batch_size,
class_mode = 'categorical',
subset = 'validation',
shuffle = False,
seed = 42)
classes = train_gen.class_indices
num_classes = len(classes)
print("There are ", num_classes, "classes: ", classes)
reversed_classes = {value : key for (key, value) in classes.items()}
print(reversed_classes)
train_dataset = tf.data.Dataset.from_generator(
lambda: train_gen,
output_types = (tf.float32, tf.float32),
output_shapes = ([None, img_h, img_w, 3], [None, num_classes]))
train_dataset = train_dataset.repeat()
validation_dataset = tf.data.Dataset.from_generator(
lambda: validation_gen,
output_types = (tf.float32, tf.float32),
output_shapes = ([None, img_h, img_w, 3], [None, num_classes]))
validation_dataset = validation_dataset.repeat()
Found 3930 images belonging to 3 classes.
Found 1684 images belonging to 3 classes.
There are 3 classes: {'0': 0, '1': 1, '2': 2}
{0: '0', 1: '1', 2: '2'}
The tf.data.Dataset.from_generator allows you to generate your own dataset at runtime without any storage hassles.
You can create a dataset whose elements are defined by a generator function.
The parameters of tf.data.Dataset.from_generator are :
tf.Dtype of the elements yielded by the generator function(eg: tf.string, tf.bool, tf.float32, tf.int32)tf.TensorShape of the elements yielded by the generator function# Visualize some images
plt.figure(figsize=(15, 9))
for i in range(15):
img, label = train_gen.next()
plt.subplot(3, 5, i+1)
plt.imshow(img[0])
plt.axis('off')
plt.subplots_adjust(wspace=.1, hspace=.01)
# Visualize some images with predicted class
plt.figure(figsize=(15,10))
# To not have unwanted text output (warnings, etc.)
with io.capture_output() as captured:
for i in range(15):
plt.subplot(3, 5, i+1);
plt.imshow(train_gen[0][0][i]);
if np.argmax(train_gen[0][1][i]) == 0:
plt.title('No masks');
elif np.argmax(train_gen[0][1][i]) == 1:
plt.title('All masks');
elif np.argmax(train_gen[0][1][i]) == 2:
plt.title('Some masks')
plt.axis('off')
plt.subplots_adjust(wspace=.01, hspace=.2)
# Count the number of images for each class and construct a dict: {class: num_images}
classes_names = Counter(train_gen.classes).keys()
classes_values = Counter(train_gen.classes).values()
# Define the dict
class_dict = {list(classes_names)[0]: list(classes_values)[0],
list(classes_names)[1]: list(classes_values)[1],
list(classes_names)[2]: list(classes_values)[2]}
# Plot
fig, ax = plt.subplots(figsize=(5,5));
ax.bar(class_dict.keys(), class_dict.values());
# Base model
base_model = tf.keras.applications.EfficientNetB3(
include_top = False,
weights = "imagenet",
input_shape = (img_h, img_w, 3))
# Unfreeze all the layers
for layer in base_model.layers:
layer.trainable = True
# Freeze the first 20 layers (out of 384)
for layer in base_model.layers[:20]:
layer.trainable = False
# Build the model
x = tf.identity(base_model.output)
x = tf.keras.layers.AveragePooling2D(pool_size=(5,5))(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(512, activation='relu', kernel_regularizer=regularizers.l2(0.001))(x)
x = tf.keras.layers.Dropout(0.5)(x)
x = tf.keras.layers.Dense(num_classes, activation='softmax')(x)
model = tf.keras.models.Model(base_model.input, x)
Downloading data from https://storage.googleapis.com/keras-applications/efficientnetb3_notop.h5 43941888/43941136 [==============================] - 0s 0us/step 43950080/43941136 [==============================] - 0s 0us/step
# Model visualization
tf.keras.utils.plot_model(model, to_file='model.png', show_shapes=True, show_layer_names=True)
# To get exact numbers
model.summary()
# Output:
# ==================================================================================================
# Total params: 11,572,018
# Trainable params: 11,480,993
# Non-trainable params: 91,025
# __________________________________________________________________________________________________
# Maximum number of epochs
num_epochs = 500
# Optimizer
optimizer = tf.keras.optimizers.Adadelta(lr=1.0, rho=0.95, epsilon=1e-08, decay=0.0)
# Loss function
loss_function = tf.keras.losses.CategoricalCrossentropy()
# Metrics
metrics = ['accuracy']
# Compile
model.compile(optimizer=optimizer, loss=loss_function, metrics=metrics)
# To save the model
filepath = '/content/drive/MyDrive/Face Mask/' + 'model_02.hdf5'
# Callbacks
Early_Stopping = tf.keras.callbacks.EarlyStopping(
monitor = "val_loss",
restore_best_weights = False,
verbose = 1,
patience = 10)
Learning_Rate_Adapter = tf.keras.callbacks.ReduceLROnPlateau(
monitor = 'val_loss',
factor = 0.2,
patience = 3,
verbose = 1,
mode = 'auto')
Best_model_save = tf.keras.callbacks.ModelCheckpoint(
filepath = filepath,
save_best_only = True,
monitor = 'val_loss',
mode = 'min',
verbose = True)
callback = [Early_Stopping, Learning_Rate_Adapter, Best_model_save]
history = model.fit(
train_dataset,
epochs = num_epochs,
steps_per_epoch = len(train_gen),
validation_data = validation_dataset,
validation_steps = len(validation_gen),
callbacks = callback)
Epoch 1/500 123/123 [==============================] - ETA: 0s - loss: 1.4656 - accuracy: 0.6399 Epoch 1: val_loss improved from inf to 1.90421, saving model to /content/drive/MyDrive/Face Mask/model_02.hdf5 123/123 [==============================] - 146s 959ms/step - loss: 1.4656 - accuracy: 0.6399 - val_loss: 1.9042 - val_accuracy: 0.3236 - lr: 1.0000 Epoch 2/500 123/123 [==============================] - ETA: 0s - loss: 1.0846 - accuracy: 0.8153 Epoch 2: val_loss improved from 1.90421 to 1.75512, saving model to /content/drive/MyDrive/Face Mask/model_02.hdf5 123/123 [==============================] - 113s 922ms/step - loss: 1.0846 - accuracy: 0.8153 - val_loss: 1.7551 - val_accuracy: 0.3201 - lr: 1.0000 Epoch 3/500 123/123 [==============================] - ETA: 0s - loss: 0.8250 - accuracy: 0.8992 Epoch 3: val_loss did not improve from 1.75512 123/123 [==============================] - 110s 898ms/step - loss: 0.8250 - accuracy: 0.8992 - val_loss: 4.5090 - val_accuracy: 0.4840 - lr: 1.0000 Epoch 4/500 123/123 [==============================] - ETA: 0s - loss: 0.6472 - accuracy: 0.9440 Epoch 4: val_loss did not improve from 1.75512 123/123 [==============================] - 110s 893ms/step - loss: 0.6472 - accuracy: 0.9440 - val_loss: 5.5548 - val_accuracy: 0.3747 - lr: 1.0000 Epoch 5/500 123/123 [==============================] - ETA: 0s - loss: 0.5201 - accuracy: 0.9628 Epoch 5: ReduceLROnPlateau reducing learning rate to 0.2. Epoch 5: val_loss did not improve from 1.75512 123/123 [==============================] - 110s 893ms/step - loss: 0.5201 - accuracy: 0.9628 - val_loss: 1.9560 - val_accuracy: 0.4139 - lr: 1.0000 Epoch 6/500 123/123 [==============================] - ETA: 0s - loss: 0.4295 - accuracy: 0.9827 Epoch 6: val_loss improved from 1.75512 to 1.25283, saving model to /content/drive/MyDrive/Face Mask/model_02.hdf5 123/123 [==============================] - 112s 911ms/step - loss: 0.4295 - accuracy: 0.9827 - val_loss: 1.2528 - val_accuracy: 0.7933 - lr: 0.2000 Epoch 7/500 123/123 [==============================] - ETA: 0s - loss: 0.3952 - accuracy: 0.9868 Epoch 7: val_loss improved from 1.25283 to 1.11933, saving model to /content/drive/MyDrive/Face Mask/model_02.hdf5 123/123 [==============================] - 113s 921ms/step - loss: 0.3952 - accuracy: 0.9868 - val_loss: 1.1193 - val_accuracy: 0.8337 - lr: 0.2000 Epoch 8/500 123/123 [==============================] - ETA: 0s - loss: 0.3708 - accuracy: 0.9888 Epoch 8: val_loss did not improve from 1.11933 123/123 [==============================] - 110s 896ms/step - loss: 0.3708 - accuracy: 0.9888 - val_loss: 1.1940 - val_accuracy: 0.8254 - lr: 0.2000 Epoch 9/500 123/123 [==============================] - ETA: 0s - loss: 0.3425 - accuracy: 0.9936 Epoch 9: val_loss did not improve from 1.11933 123/123 [==============================] - 110s 895ms/step - loss: 0.3425 - accuracy: 0.9936 - val_loss: 1.1494 - val_accuracy: 0.8403 - lr: 0.2000 Epoch 10/500 123/123 [==============================] - ETA: 0s - loss: 0.3214 - accuracy: 0.9934 Epoch 10: ReduceLROnPlateau reducing learning rate to 0.04000000059604645. Epoch 10: val_loss did not improve from 1.11933 123/123 [==============================] - 109s 891ms/step - loss: 0.3214 - accuracy: 0.9934 - val_loss: 1.1847 - val_accuracy: 0.8373 - lr: 0.2000 Epoch 11/500 123/123 [==============================] - ETA: 0s - loss: 0.3131 - accuracy: 0.9921 Epoch 11: val_loss did not improve from 1.11933 123/123 [==============================] - 110s 893ms/step - loss: 0.3131 - accuracy: 0.9921 - val_loss: 1.1199 - val_accuracy: 0.8504 - lr: 0.0400 Epoch 12/500 123/123 [==============================] - ETA: 0s - loss: 0.3068 - accuracy: 0.9906 Epoch 12: val_loss improved from 1.11933 to 1.10653, saving model to /content/drive/MyDrive/Face Mask/model_02.hdf5 123/123 [==============================] - 112s 915ms/step - loss: 0.3068 - accuracy: 0.9906 - val_loss: 1.1065 - val_accuracy: 0.8498 - lr: 0.0400 Epoch 13/500 123/123 [==============================] - ETA: 0s - loss: 0.3020 - accuracy: 0.9919 Epoch 13: val_loss did not improve from 1.10653 123/123 [==============================] - 110s 894ms/step - loss: 0.3020 - accuracy: 0.9919 - val_loss: 1.1511 - val_accuracy: 0.8527 - lr: 0.0400 Epoch 14/500 123/123 [==============================] - ETA: 0s - loss: 0.2964 - accuracy: 0.9944 Epoch 14: val_loss did not improve from 1.10653 123/123 [==============================] - 110s 892ms/step - loss: 0.2964 - accuracy: 0.9944 - val_loss: 1.1251 - val_accuracy: 0.8539 - lr: 0.0400 Epoch 15/500 123/123 [==============================] - ETA: 0s - loss: 0.2925 - accuracy: 0.9926 Epoch 15: ReduceLROnPlateau reducing learning rate to 0.007999999821186066. Epoch 15: val_loss did not improve from 1.10653 123/123 [==============================] - 110s 894ms/step - loss: 0.2925 - accuracy: 0.9926 - val_loss: 1.1122 - val_accuracy: 0.8498 - lr: 0.0400 Epoch 16/500 123/123 [==============================] - ETA: 0s - loss: 0.2906 - accuracy: 0.9939 Epoch 16: val_loss did not improve from 1.10653 123/123 [==============================] - 110s 892ms/step - loss: 0.2906 - accuracy: 0.9939 - val_loss: 1.1170 - val_accuracy: 0.8486 - lr: 0.0080 Epoch 17/500 123/123 [==============================] - ETA: 0s - loss: 0.2878 - accuracy: 0.9936 Epoch 17: val_loss did not improve from 1.10653 123/123 [==============================] - 110s 895ms/step - loss: 0.2878 - accuracy: 0.9936 - val_loss: 1.1215 - val_accuracy: 0.8480 - lr: 0.0080 Epoch 18/500 123/123 [==============================] - ETA: 0s - loss: 0.2865 - accuracy: 0.9939 Epoch 18: ReduceLROnPlateau reducing learning rate to 0.0015999998897314072. Epoch 18: val_loss did not improve from 1.10653 123/123 [==============================] - 110s 892ms/step - loss: 0.2865 - accuracy: 0.9939 - val_loss: 1.1224 - val_accuracy: 0.8492 - lr: 0.0080 Epoch 19/500 123/123 [==============================] - ETA: 0s - loss: 0.2842 - accuracy: 0.9952 Epoch 19: val_loss did not improve from 1.10653 123/123 [==============================] - 110s 892ms/step - loss: 0.2842 - accuracy: 0.9952 - val_loss: 1.1224 - val_accuracy: 0.8486 - lr: 0.0016 Epoch 20/500 123/123 [==============================] - ETA: 0s - loss: 0.2842 - accuracy: 0.9949 Epoch 20: val_loss did not improve from 1.10653 123/123 [==============================] - 110s 895ms/step - loss: 0.2842 - accuracy: 0.9949 - val_loss: 1.1163 - val_accuracy: 0.8486 - lr: 0.0016 Epoch 21/500 123/123 [==============================] - ETA: 0s - loss: 0.2898 - accuracy: 0.9908 Epoch 21: ReduceLROnPlateau reducing learning rate to 0.0003199999686330557. Epoch 21: val_loss did not improve from 1.10653 123/123 [==============================] - 114s 925ms/step - loss: 0.2898 - accuracy: 0.9908 - val_loss: 1.1270 - val_accuracy: 0.8474 - lr: 0.0016 Epoch 22/500 123/123 [==============================] - ETA: 0s - loss: 0.2861 - accuracy: 0.9947 Epoch 22: val_loss did not improve from 1.10653 123/123 [==============================] - 110s 892ms/step - loss: 0.2861 - accuracy: 0.9947 - val_loss: 1.1232 - val_accuracy: 0.8486 - lr: 3.2000e-04 Epoch 22: early stopping
plt.figure(figsize=(9,5));
plt.plot(history.history['loss'], label='Training Loss');
plt.plot(history.history['val_loss'], label='Validation Loss');
plt.legend();
plt.xlabel('Epochs');
# Load and evaluate the best model version
model = tf.keras.models.load_model(filepath)
keys = classes.keys()
y_pred = [] # store predicted labels
y_true = [] # store true labels
# For each validation image we get a probability vector on the classes, we collect everything in "values"
values = model.predict(validation_gen)
# For each validation image we assign the class corresponding to the highest probability, we collect everything in "predictions"
predictions = []
for n in range(len(values)):
y_pred.append(np.argmax(values[n], axis=-1))
y_true = validation_gen.classes
# Convert the true and predicted labels into tensors
correct_labels = tf.concat([item for item in y_true], axis=0)
predicted_labels = tf.concat([item for item in y_pred], axis=0)
# Build the confusion matrix
cm = confusion_matrix(correct_labels, predicted_labels)
# Normalize the confusion matrix
cmn = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# Plot
fig, ax = plt.subplots(figsize=(6,6))
sns.heatmap(cmn, annot=True, fmt='.3f', xticklabels=keys, yticklabels=keys);
plt.ylabel('Actual');
plt.xlabel('Predicted');
# Micro
# Calculate metrics globally by counting the total true positives, false negatives and false positives.
print('-----------------------------')
print('MICRO')
print('-----------------------------')
print('Precision: %.5f' %precision_score(correct_labels, predicted_labels, pos_label='positive', average='micro'))
print('Recall: %.5f' % recall_score(correct_labels, predicted_labels, pos_label='positive', average='micro'))
print('Accuracy: %.5f' % accuracy_score(correct_labels, predicted_labels))
print('F1: %.5f' % f1_score(correct_labels, predicted_labels, pos_label='positive', average='micro'))
# Macro
# Calculate metrics for each label, and find their unweighted mean.
# This does not take label imbalance into account.
print('-----------------------------')
print('MACRO')
print('-----------------------------')
print('Precision: %.5f' %precision_score(correct_labels, predicted_labels, pos_label='positive', average='macro'))
print('Recall: %.5f' % recall_score(correct_labels, predicted_labels, pos_label='positive', average='macro'))
print('Accuracy: %.5f' % accuracy_score(correct_labels, predicted_labels))
print('F1: %.5f' % f1_score(correct_labels, predicted_labels, pos_label='positive', average='macro'))
# Weighted
# Calculate metrics for each label, and find their average weighted by the number of true instances for each label.
# This alters ‘macro’ to account for label imbalance.
print('-----------------------------')
print('WEIGHTED')
print('-----------------------------')
print('Precision: %.5f' %precision_score(correct_labels, predicted_labels, pos_label='positive', average='weighted'))
print('Recall: %.5f' % recall_score(correct_labels, predicted_labels, pos_label='positive', average='weighted'))
print('Accuracy: %.5f' % accuracy_score(correct_labels, predicted_labels))
print('F1: %.5f' % f1_score(correct_labels, predicted_labels, pos_label='positive', average='weighted'))
----------------------------- MICRO ----------------------------- Precision: 0.84976 Recall: 0.84976 Accuracy: 0.84976 F1: 0.84976 ----------------------------- MACRO ----------------------------- Precision: 0.84730 Recall: 0.84810 Accuracy: 0.84976 F1: 0.84723 ----------------------------- WEIGHTED ----------------------------- Precision: 0.84818 Recall: 0.84976 Accuracy: 0.84976 F1: 0.84851
# Load and evaluate the best model version
model = tf.keras.models.load_model(filepath)
# Visualize the images with the predicted class
plt.figure(figsize=(15,10))
path_img_test = '/content/drive/MyDrive/Face Mask/test'
n = 0
for i in os.listdir(path_img_test):
# Load image
img = image.load_img(path_img_test + '/' + i, target_size=(img_h, img_w))
# Define the plot
plt.subplot(3, 5, n+1)
plt.imshow(img)
plt.axis("off")
# Define the label
X = image.img_to_array(img)
X = np.expand_dims(X, axis=0)
images = np.vstack([X])
if np.argmax(model.predict(images))==0:
plt.title('No masks')
elif np.argmax(model.predict(images))==1:
plt.title('All masks')
else:
plt.title('Some masks')
# Iter
n = n+1
plt.subplots_adjust(wspace=.01, hspace=.2)
from tensorflow.keras.preprocessing import image
import matplotlib.cm as cm
def get_img_array(img_path, size):
# `img` is a PIL image of size 299x299
img = keras.preprocessing.image.load_img(img_path, target_size=size)
# `array` is a float32 Numpy array of shape (299, 299, 3)
array = keras.preprocessing.image.img_to_array(img)
# We add a dimension to transform our array into a "batch"
# of size (1, 299, 299, 3)
array = np.expand_dims(array, axis=0)
return array
def make_gradcam_heatmap(img_array, model, last_conv_layer_name, pred_index=None):
# First, we create a model that maps the input image to the activations
# of the last conv layer as well as the output predictions
grad_model = tf.keras.models.Model(
[model.inputs], [model.get_layer(last_conv_layer_name).output, model.output]
)
# Then, we compute the gradient of the top predicted class for our input image
# with respect to the activations of the last conv layer
with tf.GradientTape() as tape:
last_conv_layer_output, preds = grad_model(img_array)
if pred_index is None:
pred_index = tf.argmax(preds[0])
class_channel = preds[:, pred_index]
# This is the gradient of the output neuron (top predicted or chosen)
# with regard to the output feature map of the last conv layer
grads = tape.gradient(class_channel, last_conv_layer_output)
# This is a vector where each entry is the mean intensity of the gradient
# over a specific feature map channel
pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
# We multiply each channel in the feature map array
# by "how important this channel is" with regard to the top predicted class
# then sum all the channels to obtain the heatmap class activation
last_conv_layer_output = last_conv_layer_output[0]
heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis]
heatmap = tf.squeeze(heatmap)
# For visualization purpose, we will also normalize the heatmap between 0 & 1
heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
return heatmap.numpy()
def save_and_display_gradcam(img_path, heatmap, cam_path="cam.jpg", alpha=0.4):
# Load the original image
img = keras.preprocessing.image.load_img(img_path)
img = keras.preprocessing.image.img_to_array(img)
# Rescale heatmap to a range 0-255
heatmap = np.uint8(255 * heatmap)
# Use jet colormap to colorize heatmap
jet = cm.get_cmap("jet")
# Use RGB values of the colormap
jet_colors = jet(np.arange(256))[:, :3]
jet_heatmap = jet_colors[heatmap]
# Create an image with RGB colorized heatmap
jet_heatmap = keras.preprocessing.image.array_to_img(jet_heatmap)
jet_heatmap = jet_heatmap.resize((img.shape[1], img.shape[0]))
jet_heatmap = keras.preprocessing.image.img_to_array(jet_heatmap)
# Superimpose the heatmap on original image
superimposed_img = jet_heatmap * alpha + img
superimposed_img = keras.preprocessing.image.array_to_img(superimposed_img)
# Save the superimposed image
superimposed_img.save(cam_path)
# Display Grad CAM
#display(Image(cam_path))
return cam_path
preprocess_input = keras.applications.xception.preprocess_input
decode_predictions = keras.applications.xception.decode_predictions
last_conv_layer_name = 'tf.identity'
# The local path to our target image
img_path = '/content/drive/MyDrive/Face Mask/test/10040.jpg'
img_array = get_img_array(img_path, size=(img_h, img_w))
img_array = preprocess_input(img_array)
# Remove last layer's softmax
model.layers[-1].activation = None
# Print what the top predicted class is
preds = model.predict(img_array)
# Generate class activation heatmap
heatmap = make_gradcam_heatmap(img_array, model, last_conv_layer_name)
# Display heatmap
print("The heatmap: ")
plt.matshow(heatmap)
plt.axis('off')
plt.show()
# Display the image
print("\nThe image with the heatmap: ")
image = Image(save_and_display_gradcam(img_path, heatmap), width = 500, height = 350)
display(image)
The heatmap:
The image with the heatmap:
from keras.preprocessing import image
# Visualize the images with the predicted class
plt.figure(figsize=(15,10))
# For the GradCam (remove the last layer - softmax)
model.layers[-1].activation = None
n = 0
for i in os.listdir(path_img_test):
# Load image
img = image.load_img(path_img_test + '/' + i, target_size=(img_h, img_w))
# GradCam
img_path = path_img_test + '/' + i
img_array = get_img_array(img_path, size=(img_h, img_w))
img_array = preprocess_input(get_img_array(img_path, size=(img_h, img_w)))
heatmap = make_gradcam_heatmap(img_array, model, last_conv_layer_name)
img_2 = save_and_display_gradcam(img_path, heatmap)
img_2 = image.load_img(img_2, target_size = (img_h, img_w))
# Define the plot
plt.subplot(3, 5, n+1)
plt.imshow(img_2)
plt.axis("off")
# Define the label
X = image.img_to_array(img)
X = np.expand_dims(X, axis=0)
images = np.vstack([X])
if np.argmax(model.predict(images))==0:
plt.title('No masks')
elif np.argmax(model.predict(images))==1:
plt.title('All masks')
else:
plt.title('Some masks')
# Iter
n = n+1
plt.subplots_adjust(wspace=.01, hspace=.2)